use return value of domain_adjust_tot_pages() where feasible
authorJan Beulich <jbeulich@suse.com>
Tue, 3 Dec 2013 11:41:54 +0000 (12:41 +0100)
committerJan Beulich <jbeulich@suse.com>
Tue, 3 Dec 2013 11:41:54 +0000 (12:41 +0100)
This is generally cheaper than re-reading ->tot_pages.

While doing so I also noticed an improper use (lacking error handling)
of get_domain() as well as lacks of ->is_dying checks in the memory
sharing code, which the patch fixes at once. In the course of doing
this I further noticed other error paths there pointlessly calling
put_page() et al with ->page_alloc_lock still held, which is also being
reversed.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>
Acked-by: Keir Fraser <keir@xen.org>
xen/arch/x86/mm/mem_sharing.c
xen/common/memory.c
xen/common/page_alloc.c

index 1e89f6c4fcd92ce0d55347a152ebf1947f04dd47..c94856b911ec3c9809f110c1019366330eb8335f 100644 (file)
@@ -611,9 +611,16 @@ static int page_make_sharable(struct domain *d,
                        struct page_info *page, 
                        int expected_refcnt)
 {
-    int drop_dom_ref;
+    bool_t drop_dom_ref;
+
     spin_lock(&d->page_alloc_lock);
 
+    if ( d->is_dying )
+    {
+        spin_unlock(&d->page_alloc_lock);
+        return -EBUSY;
+    }
+
     /* Change page type and count atomically */
     if ( !get_page_and_type(page, d, PGT_shared_page) )
     {
@@ -624,8 +631,8 @@ static int page_make_sharable(struct domain *d,
     /* Check it wasn't already sharable and undo if it was */
     if ( (page->u.inuse.type_info & PGT_count_mask) != 1 )
     {
-        put_page_and_type(page);
         spin_unlock(&d->page_alloc_lock);
+        put_page_and_type(page);
         return -EEXIST;
     }
 
@@ -633,15 +640,14 @@ static int page_make_sharable(struct domain *d,
      * the second from get_page_and_type at the top of this function */
     if ( page->count_info != (PGC_allocated | (2 + expected_refcnt)) )
     {
+        spin_unlock(&d->page_alloc_lock);
         /* Return type count back to zero */
         put_page_and_type(page);
-        spin_unlock(&d->page_alloc_lock);
         return -E2BIG;
     }
 
     page_set_owner(page, dom_cow);
-    domain_adjust_tot_pages(d, -1);
-    drop_dom_ref = (d->tot_pages == 0);
+    drop_dom_ref = !domain_adjust_tot_pages(d, -1);
     page_list_del(page, &d->page_list);
     spin_unlock(&d->page_alloc_lock);
 
@@ -659,6 +665,13 @@ static int page_make_private(struct domain *d, struct page_info *page)
     
     spin_lock(&d->page_alloc_lock);
 
+    if ( d->is_dying )
+    {
+        spin_unlock(&d->page_alloc_lock);
+        put_page(page);
+        return -EBUSY;
+    }
+
     /* We can only change the type if count is one */
     /* Because we are locking pages individually, we need to drop
      * the lock here, while the page is typed. We cannot risk the 
@@ -666,8 +679,8 @@ static int page_make_private(struct domain *d, struct page_info *page)
     expected_type = (PGT_shared_page | PGT_validated | PGT_locked | 2);
     if ( page->u.inuse.type_info != expected_type )
     {
-        put_page(page);
         spin_unlock(&d->page_alloc_lock);
+        put_page(page);
         return -EEXIST;
     }
 
@@ -682,7 +695,7 @@ static int page_make_private(struct domain *d, struct page_info *page)
     page_set_owner(page, d);
 
     if ( domain_adjust_tot_pages(d, 1) == 1 )
-        get_domain(d);
+        get_knownalive_domain(d);
     page_list_add_tail(page, &d->page_list);
     spin_unlock(&d->page_alloc_lock);
 
index 50b740f752da6199cfcdfb5561c3d2ab8e760ce9..eb7b72b60f52fcb8146bb3fc5ffbc4acc308723e 100644 (file)
@@ -475,8 +475,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
                              (j * (1UL << exch.out.extent_order)));
 
                 spin_lock(&d->page_alloc_lock);
-                domain_adjust_tot_pages(d, -dec_count);
-                drop_dom_ref = (dec_count && !d->tot_pages);
+                drop_dom_ref = (dec_count &&
+                                !domain_adjust_tot_pages(d, -dec_count));
                 spin_unlock(&d->page_alloc_lock);
 
                 if ( drop_dom_ref )
index 9497623d4f53863945ca68efb32a1d63e9f3c799..8002bd25177cfbe215df24aed993786bd9b6cfc9 100644 (file)
@@ -1519,8 +1519,9 @@ struct page_info *alloc_domheap_pages(
 
 void free_domheap_pages(struct page_info *pg, unsigned int order)
 {
-    int            i, drop_dom_ref;
     struct domain *d = page_get_owner(pg);
+    unsigned int i;
+    bool_t drop_dom_ref;
 
     ASSERT(!in_irq());
 
@@ -1548,8 +1549,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
             page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list);
         }
 
-        domain_adjust_tot_pages(d, -(1 << order));
-        drop_dom_ref = (d->tot_pages == 0);
+        drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));
 
         spin_unlock_recursive(&d->page_alloc_lock);